#include "cpu.h"
+int start_svm(struct cpuinfo_x86 *c);
+
/*
* amd_flush_filter={on,off}. Forcibly Enable or disable the TLB flush
* filter on AMD 64-bit processors.
if ((smp_processor_id() == 1) && c1_ramping_may_cause_clock_drift(c))
disable_c1_ramping();
- start_svm();
+ start_svm(c);
}
static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
subdir-$(x86_32) += x86_32
subdir-$(x86_64) += x86_64
+obj-y += asid.o
obj-y += emulate.o
obj-y += intr.o
obj-y += svm.o
--- /dev/null
+/*
+ * asid.c: handling ASIDs in SVM.
+ * Copyright (c) 2007, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/perfc.h>
+#include <asm/hvm/svm/asid.h>
+
+/*
+ * This is the interface to SVM's ASID management. ASIDs partition the
+ * physical TLB for SVM. In the current implementation ASIDs are introduced
+ * to reduce the number of TLB flushes. Each time the guest's virtual
+ * address space changes (e.g. due to an INVLPG, MOV-TO-{CR3, CR4} operation),
+ * instead of flushing the TLB, a new ASID is assigned. This reduces the
+ * number of TLB flushes to at most 1/#ASIDs (currently 1/64). The biggest
+ * advantage is that hot parts of the hypervisor's code and data retain in
+ * the TLB.
+ *
+ * Sketch of the Implementation:
+ *
+ * ASIDs are a CPU-local resource. As preemption of ASIDs is not possible,
+ * ASIDs are assigned in a round-robin scheme. To minimize the overhead of
+ * ASID invalidation, at the time of a TLB flush, ASIDs are tagged with a
+ * 64-bit generation. Only on a generation overflow the code needs to
+ * invalidate all ASID information stored at the VCPUs with are run on the
+ * specific physical processor. This overflow appears after about 2^80
+ * host processor cycles, so we do not optimize this case, but simply disable
+ * ASID useage to retain correctness.
+ */
+
+/* usable guest asids [ 1 .. get_max_asid() ) */
+#define SVM_ASID_FIRST_GUEST_ASID 1
+
+#define SVM_ASID_FIRST_GENERATION 0
+
+/* triggers the flush of all generations on all VCPUs */
+#define SVM_ASID_LAST_GENERATION (0xfffffffffffffffd)
+
+/* triggers assignment of new ASID to a VCPU */
+#define SVM_ASID_INVALID_GENERATION (SVM_ASID_LAST_GENERATION + 1)
+
+/* Per-CPU ASID management. */
+struct svm_asid_data {
+ u64 core_asid_generation;
+ u32 next_asid;
+ u32 max_asid;
+ u32 erratum170;
+};
+
+static DEFINE_PER_CPU(struct svm_asid_data, svm_asid_data);
+
+/*
+ * Get handle to CPU-local ASID management data.
+ */
+static struct svm_asid_data *svm_asid_core_data(void)
+{
+ return &get_cpu_var(svm_asid_data);
+}
+
+/*
+ * Init ASID management for the current physical CPU.
+ */
+void svm_asid_init(struct cpuinfo_x86 *c)
+{
+ int nasids;
+ struct svm_asid_data *data = svm_asid_core_data();
+
+ /* Find #ASID. */
+ nasids = cpuid_ebx(0x8000000A);
+ data->max_asid = nasids - 1;
+
+ /* Check if we can use ASIDs. */
+ data->erratum170 =
+ !((c->x86 == 0x10) ||
+ ((c->x86 == 0xf) && (c->x86_model >= 0x68) && (c->x86_mask >= 1)));
+
+ printk("AMD SVM: ASIDs %s \n",
+ (data->erratum170 ? "disabled." : "enabled."));
+
+ /* Initialize ASID assigment. */
+ if ( data->erratum170 )
+ {
+ /* On errata #170, VCPUs and phys processors should have same
+ generation. We set both to invalid. */
+ data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
+ }
+ else
+ {
+ data->core_asid_generation = SVM_ASID_FIRST_GENERATION;
+ }
+
+ /* ASIDs are assigned round-robin. Start with the first. */
+ data->next_asid = SVM_ASID_FIRST_GUEST_ASID;
+}
+
+/*
+ * Force VCPU to fetch a new ASID.
+ */
+void svm_asid_init_vcpu(struct vcpu *v)
+{
+ struct svm_asid_data *data = svm_asid_core_data();
+
+ /* Trigger asignment of a new ASID. */
+ v->arch.hvm_svm.asid_generation = SVM_ASID_INVALID_GENERATION;
+
+ /*
+ * This erratum is bound to a physical processor. The tlb_control
+ * field is not changed by the processor. We only set tlb_control
+ * on VMCB creation and on a migration.
+ */
+ if ( data->erratum170 )
+ {
+ /* Flush TLB every VMRUN to handle Errata #170. */
+ v->arch.hvm_svm.vmcb->tlb_control = 1;
+ /* All guests use same ASID. */
+ v->arch.hvm_svm.vmcb->guest_asid = 1;
+ }
+ else
+ {
+ /* These fields are handled on VMRUN */
+ v->arch.hvm_svm.vmcb->tlb_control = 0;
+ v->arch.hvm_svm.vmcb->guest_asid = 0;
+ }
+}
+
+/*
+ * Increase the Generation to make free ASIDs. Flush physical TLB and give
+ * ASID.
+ */
+static void svm_asid_handle_inc_generation(struct vcpu *v)
+{
+ struct svm_asid_data *data = svm_asid_core_data();
+
+ if ( likely(data->core_asid_generation < SVM_ASID_LAST_GENERATION) )
+ {
+ /* Handle ASID overflow. */
+ data->core_asid_generation++;
+ data->next_asid = SVM_ASID_FIRST_GUEST_ASID + 1;
+
+ /* Handle VCPU. */
+ v->arch.hvm_svm.vmcb->guest_asid = SVM_ASID_FIRST_GUEST_ASID;
+ v->arch.hvm_svm.asid_generation = data->core_asid_generation;
+
+ /* Trigger flush of physical TLB. */
+ v->arch.hvm_svm.vmcb->tlb_control = 1;
+ return;
+ }
+
+ /*
+ * ASID generations are 64 bit. Overflow of generations never happens.
+ * For safety, we simply disable ASIDs and switch to erratum #170 mode on
+ * this core (flushing TLB always). So correctness is established; it
+ * only runs a bit slower.
+ */
+ printk("AMD SVM: ASID generation overrun. Disabling ASIDs.\n");
+ data->erratum170 = 1;
+ data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
+
+ svm_asid_init_vcpu(v);
+}
+
+/*
+ * Called directly before VMRUN. Checks if the VCPU needs a new ASID,
+ * assigns it, and if required, issues required TLB flushes.
+ */
+asmlinkage void svm_asid_handle_vmrun(void)
+{
+ struct vcpu *v = current;
+ struct svm_asid_data *data = svm_asid_core_data();
+
+ /* On erratum #170 systems we must flush the TLB.
+ * Generation overruns are taken here, too. */
+ if ( data->erratum170 )
+ {
+ v->arch.hvm_svm.vmcb->guest_asid = 1;
+ v->arch.hvm_svm.vmcb->tlb_control = 1;
+ return;
+ }
+
+ /* Test if VCPU has valid ASID. */
+ if ( likely(v->arch.hvm_svm.asid_generation ==
+ data->core_asid_generation) )
+ {
+ /* May revert previous TLB-flush command. */
+ v->arch.hvm_svm.vmcb->tlb_control = 0;
+ return;
+ }
+
+ /* Different ASID generations trigger fetching of a fresh ASID. */
+ if ( likely(data->next_asid <= data->max_asid) )
+ {
+ /* There is a free ASID. */
+ v->arch.hvm_svm.vmcb->guest_asid = data->next_asid++;
+ v->arch.hvm_svm.asid_generation = data->core_asid_generation;
+ v->arch.hvm_svm.vmcb->tlb_control = 0;
+ return;
+ }
+
+ /* Slow path, may cause TLB flush. */
+ svm_asid_handle_inc_generation(v);
+}
+
+void svm_asid_inv_asid(struct vcpu *v)
+{
+ v->arch.hvm_svm.asid_generation = SVM_ASID_INVALID_GENERATION;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
/*
* svm.c: handling SVM architecture-related VM exits
* Copyright (c) 2004, Intel Corporation.
- * Copyright (c) 2005, AMD Corporation.
+ * Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
#include <asm/hvm/io.h>
+#include <asm/hvm/svm/asid.h>
#include <asm/hvm/svm/svm.h>
#include <asm/hvm/svm/vmcb.h>
#include <asm/hvm/svm/emulate.h>
}
paging_update_paging_modes(v);
+ /* signal paging update to ASID handler */
+ svm_asid_g_update_paging (v);
+
return 0;
bad_cr3:
{
v->arch.hvm_svm.launch_core = smp_processor_id();
hvm_migrate_timers(v);
+
+ /* Migrating to another ASID domain. Request a new ASID. */
+ svm_asid_init_vcpu(v);
}
hvm_do_resume(v);
}
}
-int start_svm(void)
+int start_svm(struct cpuinfo_x86 *c)
{
u32 eax, ecx, edx;
u32 phys_hsa_lo, phys_hsa_hi;
if ( !(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)) )
return 0;
- /* check whether SVM feature is disabled in BIOS */
+ /* Check whether SVM feature is disabled in BIOS */
rdmsr(MSR_K8_VM_CR, eax, edx);
if ( eax & K8_VMCR_SVME_DISABLE )
{
svm_npt_detect();
- /* Initialize the HSA for this core */
+ /* Initialize the HSA for this core. */
phys_hsa = (u64) virt_to_maddr(hsa[cpu]);
phys_hsa_lo = (u32) phys_hsa;
phys_hsa_hi = (u32) (phys_hsa >> 32);
wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
+ /* Initialize core's ASID handling. */
+ svm_asid_init(c);
+
if ( cpu != 0 )
return 1;
vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
if ( (value ^ old_value) & X86_CR0_PG )
+ {
paging_update_paging_modes(v);
+ /* signal paging update to ASID handler */
+ svm_asid_g_update_paging (v);
+ }
return 1;
}
if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
goto bad_cr3;
paging_update_cr3(v);
+ /* signal paging update to ASID handler */
+ svm_asid_g_mov_to_cr3 (v);
}
else
{
v->arch.hvm_svm.cpu_cr3 = value;
update_cr3(v);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
+ /* signal paging update to ASID handler */
+ svm_asid_g_mov_to_cr3 (v);
}
break;
{
vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 = value;
paging_update_paging_modes(v);
+ /* signal paging update to ASID handler */
+ svm_asid_g_update_paging (v);
break;
}
if ( old_base_mfn )
put_page(mfn_to_page(old_base_mfn));
paging_update_paging_modes(v);
+ /* signal paging update to ASID handler */
+ svm_asid_g_update_paging (v);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
(unsigned long) (mfn << PAGE_SHIFT));
* all TLB entries except global entries.
*/
if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
+ {
paging_update_paging_modes(v);
+ /* signal paging update to ASID handler */
+ svm_asid_g_update_paging (v);
+ }
break;
case 8:
HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));
paging_invlpg(v, g_vaddr);
+ /* signal invplg to ASID handler */
+ svm_asid_g_invlpg (v, g_vaddr);
}
/*
* vmcb.c: VMCB management
- * Copyright (c) 2005, AMD Corporation.
+ * Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
* Copyright (c) 2004, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
#include <asm/hvm/support.h>
#include <asm/hvm/svm/svm.h>
#include <asm/hvm/svm/intr.h>
+#include <asm/hvm/svm/asid.h>
#include <xen/event.h>
#include <xen/kernel.h>
#include <xen/domain_page.h>
struct vmcb_struct *vmcb = arch_svm->vmcb;
svm_segment_attributes_t attrib;
- /* Always flush the TLB on VMRUN. All guests share a single ASID (1). */
- vmcb->tlb_control = 1;
- vmcb->guest_asid = 1;
+ /* TLB control, and ASID assigment. */
+ svm_asid_init_vcpu (v);
- /* SVM intercepts. */
vmcb->general1_intercepts =
GENERAL1_INTERCEPT_INTR | GENERAL1_INTERCEPT_NMI |
GENERAL1_INTERCEPT_SMI | GENERAL1_INTERCEPT_INIT |
/*
* exits.S: SVM architecture-specific exit handling.
+ * Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
* Copyright (c) 2004, Intel Corporation.
- * Copyright (c) 2005, AMD Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
ENTRY(svm_asm_do_resume)
GET_CURRENT(%ebx)
CLGI
+ /* Run ASID stuff. */
+ call svm_asid_handle_vmrun
+
movl VCPU_processor(%ebx),%eax
shl $IRQSTAT_shift,%eax
testl $~0,irq_stat(%eax,1)
/*
* exits.S: AMD-V architecture-specific exit handling.
+ * Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
* Copyright (c) 2004, Intel Corporation.
- * Copyright (c) 2005, AMD Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
ENTRY(svm_asm_do_resume)
GET_CURRENT(%rbx)
CLGI
+ /* Run ASID stuff. */
+ call svm_asid_handle_vmrun
+
movl VCPU_processor(%rbx),%eax
shl $IRQSTAT_shift,%rax
leaq irq_stat(%rip),%rdx
--- /dev/null
+/*
+ * asid.h: handling ASIDs in SVM.
+ * Copyright (c) 2007, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef __ASM_X86_HVM_SVM_ASID_H__
+#define __ASM_X86_HVM_SVM_ASID_H__
+
+#include <xen/config.h>
+#include <asm/types.h>
+#include <asm/hvm/hvm.h>
+#include <asm/hvm/support.h>
+#include <asm/hvm/svm/svm.h>
+#include <asm/hvm/svm/vmcb.h>
+#include <asm/percpu.h>
+
+void svm_asid_init(struct cpuinfo_x86 *c);
+void svm_asid_init_vcpu(struct vcpu *v);
+void svm_asid_inv_asid(struct vcpu *v);
+
+/*
+ * ASID related, guest triggered events.
+ */
+
+static inline void svm_asid_g_update_paging(struct vcpu *v)
+{
+ svm_asid_inv_asid(v);
+}
+
+static inline void svm_asid_g_mov_to_cr3(struct vcpu *v)
+{
+ svm_asid_inv_asid(v);
+}
+
+static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
+{
+#if 0
+ /* Optimization? */
+ asm volatile (".byte 0x0F,0x01,0xDF \n"
+ : /* output */
+ : /* input */
+ "a" (g_vaddr), "c"(v->arch.hvm_svm.vmcb->guest_asid) );
+#endif
+
+ /* Safe fallback. Take a new ASID. */
+ svm_asid_inv_asid(v);
+}
+
+#endif /* __ASM_X86_HVM_SVM_ASID_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
/*
* vmcb.h: VMCB related definitions
- * Copyright (c) 2005, AMD Corporation.
+ * Copyright (c) 2005-2007, Advanced Micro Devices, Inc
* Copyright (c) 2004, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
#include <asm/config.h>
#include <asm/hvm/hvm.h>
-int start_svm(void);
/* general 1 intercepts */
enum GenericIntercept1bits
struct arch_svm_struct {
struct vmcb_struct *vmcb;
u64 vmcb_pa;
+ u64 asid_generation; /* ASID tracking, moved here to
+ prevent cacheline misses. */
u32 *msrpm;
int launch_core;
bool_t vmcb_in_sync; /* VMCB sync'ed with VMSAVE? */